Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
  • Modularized the previous code

Camera Calibration

Step1: I'll compute the camera calibration using chessboard images

In [1]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib qt

Find the Chessboard Corners

In [3]:
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.

# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')

# Plot the result
fig, axs = plt.subplots(5, 4, figsize=(24, 10))  #axs[20]
axs = axs.flatten()

# fig.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.8, hspace=0.2, wspace=0.1)
fig.tight_layout()
# Step through the list and search for chessboard corners

serial_number = 0
for i, fname in enumerate(images):
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
    
    # If found, add object points, image points
    if ret == True:
        objpoints.append(objp)
        imgpoints.append(corners)

        # Draw and display the corners
        img = cv2.drawChessboardCorners(img, (9,6), corners, ret)

    axs[i].axis("off")
    axs[i].imshow(img)
    axs[i].set_title(str(i), fontsize=10)

Compute the camera calibration

In [4]:
import pickle

# Test undistortion on an image
img = cv2.imread('./camera_cal/calibration10.jpg')
img_size = (img.shape[1], img.shape[0])

# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)

dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('./camera_cal/test_undist_10.jpg',dst)

# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
dist_pickle["ret"] = ret
dist_pickle["rvecs"] = rvecs
dist_pickle["tvecs"] = tvecs
pickle.dump( dist_pickle, open( "./camera_cal/wide_dist_pickle.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
fig, axs = plt.subplots(1, 2, figsize=(20,10))
axs[0].imshow(img)
axs[0].set_title('Original Image', fontsize=30)
axs[1].imshow(dst)
axs[1].set_title('Undistorted Image', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Step2: I'll apply a distortion correction to raw images.

In [5]:
pkl_file = open( "./camera_cal/wide_dist_pickle.p", "rb" )
dist_pickle = pickle.load( pkl_file )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# print('mtx=', mtx)
# print('dis=',dist)
# dst = cv2.undistort(img, mtx, dist, None, mtx)
# cv2.imwrite('calibration_wide/test_undist.jpg',dst)

# Plot the result
fig, axs = plt.subplots(5, 4, figsize=(24, 10))  #axs[20]
axs = axs.flatten()

# fig.subplots_adjust(left=0.2, bottom=0.2, right=0.8, top=0.8, hspace=0.2, wspace=0.1)
fig.tight_layout()
# Step through the list and search for chessboard corners

for i, fname in enumerate(images):
    img = cv2.imread(fname)
    
    dst = cv2.undistort(img, mtx, dist, None, mtx)
#     cv2.imwrite('calibration_wide/test_undist.jpg',dst)
    
    axs[i].axis("off")
    axs[i].imshow(dst)
    axs[i].set_title(str(i), fontsize=10)

Pipeline(test images)

Processing test images

In [7]:
import os
images_name = os.listdir('./test_images')
# print(images_name)
for i, fname in enumerate(images_name):
    img = cv2.imread('./test_images/' + fname)
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    cv2.imwrite('./output_images/output_images_undistort/undistort_' + fname, dst)  #distortion correction of the picture, then save

img = cv2.imread('./test_images/test1.jpg')
undis_img = cv2.imread('./output_images/output_images_undistort/undistort_test1.jpg')
# Visualize undistortion of test images
fig, axs = plt.subplots(1, 2, figsize=(20,10))
axs[0].imshow(img[:,:,::-1])
axs[0].set_title('Original Image', fontsize=30)
axs[1].imshow(undis_img[:,:,::-1])
axs[1].set_title('Undistorted Image', fontsize=30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Step3: I'll use color transforms, gradients, etc., to create a thresholded binary image.

In [8]:
# Read the undistort_straight_lines1.jpg
img = cv2.imread('./output_images/output_images_undistort/undistort_straight_lines1.jpg') 

# Define a function that thresholds the S-channel of HLS
def binary_select(image, s_thresh=(170, 255), sx_thresh=(20, 100)):
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) #Convert to HLS color space
    s_channel = hls[:,:,2] #Apply a threshold to the S channel
    
    # Threshold color channel
    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 255

    
    # Sobel x
    sobelx = cv2.Sobel(s_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
    
    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 255
    
    #the threshold result of combine s_binary and sxbinary
    binary_output = np.zeros_like(s_channel)
    binary_output[(s_binary >= 255) & (sxbinary >= 255)] = 255
    
    return s_channel, s_binary, sxbinary, binary_output 
    
s_channel, s_binary, sxbinary, binary_output = binary_select(img, s_thresh=(90, 255), sx_thresh=(10, 150))

# Plot the result
fig, axs = plt.subplots(2, 3, figsize=(24, 9))
fig.tight_layout()
axs = axs.flatten()
axs[0].imshow(img[:,:,::-1])
axs[0].set_title('Original Image', fontsize=20)
axs[1].imshow(s_channel, cmap='gray')
axs[1].set_title('Thresholded S', fontsize=20)
axs[2].imshow(s_binary, cmap='gray')
axs[2].set_title('s_binary', fontsize=20)
axs[3].imshow(sxbinary, cmap='gray')
axs[3].set_title('sxbinary', fontsize=20)
axs[4].imshow(binary_output, cmap='gray')
axs[4].set_title('binary_output', fontsize=20)
plt.subplots_adjust(left=0., right=1, top=1.1, bottom=0.)

import os
images_name = os.listdir('./output_images/output_images_undistort')
# print(images_name)
for i, fname in enumerate(images_name):
    if fname.split('.')[-1] == 'jpg':
        img = cv2.imread('./output_images/output_images_undistort/' + fname)
        s_channel, s_binary, sxbinary, binary_output = binary_select(img, s_thresh=(90, 255), sx_thresh=(10, 150))
        cv2.imwrite('./output_images/output_images_binary_undistort/binary_' + fname, binary_output)  #create a thresholded binary image, then save?

Step4: I'll apply a perspective transform to rectify binary image ("birds-eye view").

In [26]:
# Read in the saved camera matrix and distortion coefficients
pkl_file = open("./camera_cal/wide_dist_pickle.p", "rb")
dist_pickle = pickle.load(pkl_file)
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]

# the function of "birds-eye view"
def birds_eye_warp(img, src, dst): #src: 4 source points / dst: 4 destination points
    h,w = img.shape[:2]
    # use cv2.getPerspectiveTransform() to get M, the transform matrix, and Minv, the inverse
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # use cv2.warpPerspective() to warp your image to a top-down view
    warped = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
    return warped, M, Minv


# Read in an image
img = cv2.imread('./output_images/output_images_undistort/undistort_straight_lines1.jpg') 


h,w = img.shape[:2]

# define source and destination points for transform
src = np.float32([[575,464], [707,464], 
                  [1049,682],[258,682]])

dst = np.float32([[150,0],[w-150,0],
                  [w-150,h],[150,h]])

# #test which are the best points in src and dst

x_src = []
y_src = []
x_dst = []
y_dst = []
x_vertices_quad = []
y_vertices_quad = []

for i in range(4):
    x_src.append(src[i][0])
    y_src.append(src[i][1])
x_src.append(src[0][0])
y_src.append(src[0][1])

for i in range(4):
    x_dst.append(dst[i][0])
    y_dst.append(dst[i][1])
x_dst.append(dst[0][0])
y_dst.append(dst[0][1])


top_down, perspective_M, perspective_Minv = birds_eye_warp(img, src, dst)
fig, axs = plt.subplots(1, 2, figsize=(24, 9))
fig.tight_layout()
axs = axs.flatten()
axs[0].imshow(img[:,:,::-1])
axs[0].set_title('Original Image', fontsize=20)
axs[1].imshow(top_down[:,:,::-1])
axs[1].set_title('Undistorted and Warped Image', fontsize=20)
axs[1].plot(x_dst, y_dst, 'b--', lw=4)
axs[0].plot(x_src, y_src, 'g--', lw=4)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-26-1faba6a3450c> in <module>
     52 
     53 top_down, perspective_M, perspective_Minv = birds_eye_warp(img, src, dst)
---> 54 fig, axs = plt.subplots(1, 2, figsize=(24, 9))
     55 fig.tight_layout()
     56 axs = axs.flatten()

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\pyplot.py in subplots(nrows, ncols, sharex, sharey, squeeze, subplot_kw, gridspec_kw, **fig_kw)
   1201         gridspec_kw = {}
   1202 
-> 1203     fig = figure(**fig_kw)
   1204     gs = GridSpec(nrows, ncols, **gridspec_kw)
   1205 

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\pyplot.py in figure(num, figsize, dpi, facecolor, edgecolor, frameon, FigureClass, **kwargs)
    533                                         frameon=frameon,
    534                                         FigureClass=FigureClass,
--> 535                                         **kwargs)
    536 
    537         if figLabel:

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\backends\backend_qt5agg.py in new_figure_manager(num, *args, **kwargs)
     42     FigureClass = kwargs.pop('FigureClass', Figure)
     43     thisFig = FigureClass(*args, **kwargs)
---> 44     return new_figure_manager_given_figure(num, thisFig)
     45 
     46 

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\backends\backend_qt5agg.py in new_figure_manager_given_figure(num, figure)
     49     Create a new figure manager instance for the given figure.
     50     """
---> 51     canvas = FigureCanvasQTAgg(figure)
     52     return FigureManagerQT(canvas, num)
     53 

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\backends\backend_qt5agg.py in __init__(self, figure)
    240         if DEBUG:
    241             print('FigureCanvasQtAgg: ', figure)
--> 242         super(FigureCanvasQTAgg, self).__init__(figure=figure)
    243         self._drawRect = None
    244         self.blitbox = []

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\backends\backend_qt5agg.py in __init__(self, figure)
     64 
     65     def __init__(self, figure):
---> 66         super(FigureCanvasQTAggBase, self).__init__(figure=figure)
     67         self._agg_draw_pending = False
     68 

G:\ProgramData\Miniconda3\envs\tf130\lib\site-packages\matplotlib\backends\backend_qt5.py in __init__(self, figure)
    238         # The need for this change is documented here
    239         # http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html#cooperative-multi-inheritance
--> 240         super(FigureCanvasQT, self).__init__(figure=figure)
    241         self.figure = figure
    242         self.setMouseTracking(True)

TypeError: 'figure' is an unknown keyword argument
In [12]:
images_name = os.listdir('./output_images/output_images_undistort')

for i, fname in enumerate(images_name):
    if fname.split('.')[-1] == 'jpg':
        img = cv2.imread('./output_images/output_images_undistort/' + fname)
        top_down, perspective_M, perspective_Minv = birds_eye_warp(img, src, dst)
        cv2.imwrite('./output_images/output_images_bird_eye/birds_eye_' + fname, top_down)  #create a thresholded binary image, then save?

images_name = os.listdir('./output_images/output_images_binary_undistort')

for i, fname in enumerate(images_name):
    if fname.split('.')[-1] == 'jpg':
        img = cv2.imread('./output_images/output_images_binary_undistort/' + fname)
        top_down, perspective_M, perspective_Minv = birds_eye_warp(img, src, dst)
        cv2.imwrite('./output_images/output_images_binary_undistort_bird_eye/birds_eye_binary' + fname, top_down)  #create a thresholded binary image, then save?

# output_images_binary_undistort_bird_eye

Step5: Detect lane pixels and fit to find the lane boundary.

Detect lane pixels by Histogram Peaks(just test)

In [14]:
import numpy as np
import matplotlib.pyplot as plt
import cv2

# Load our image
binary_warped = cv2.imread('./output_images/output_images_binary_undistort_bird_eye/birds_eye_binarybinary_undistort_straight_lines1.jpg',0)
binary_warped = binary_warped/255
def hist(img):
    # TO-DO: Grab only the bottom half of the image
    # Lane lines are likely to be mostly vertical nearest to the car
    bottom_half = None
    bottom_half = img[img.shape[0]//2:,]
    # TO-DO: Sum across image pixels vertically - make sure to set `axis`
    # i.e. the highest areas of vertical lines should be larger values
    histogram = None
    histogram = np.sum(bottom_half, axis = 0)
    return histogram

# Create histogram of image binary activations
histogram = hist(binary_warped)

# Visualize the resulting histogram
plt.plot(histogram)
Out[14]:
[<matplotlib.lines.Line2D at 0x188c7ce7fd0>]

Implement Sliding Windows and Fit a Polynomial

In [15]:
# Load our image - this should be a new frame since last time!
# binary_warped = mpimg.imread('warped_example.jpg')

binary_warped = cv2.imread('./output_images/output_images_binary_undistort_bird_eye/birds_eye_binarybinary_undistort_straight_lines1.jpg',0)

def find_lane_pixels(binary_warped):
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0) #WHC
    # Create an output image to draw on and visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]//2) #W/2
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # HYPERPARAMETERS
    # Choose the number of sliding windows
    nwindows = 9
    # Set the width of the windows +/- margin
    margin = 100
    # Set minimum number of pixels found to recenter window
    minpix = 50

    # Set height of windows - based on nwindows above and image shape
    window_height = np.int(binary_warped.shape[0]//nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0]) #H
    nonzerox = np.array(nonzero[1]) #W
    # Current positions to be updated later for each window in nwindows
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        
        # Identify the nonzero pixels in x and y within the window #
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
        
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    # Concatenate the arrays of indices (previously was a list of lists of pixels)
    try:
        left_lane_inds = np.concatenate(left_lane_inds)
        right_lane_inds = np.concatenate(right_lane_inds)
    except ValueError:
        # Avoids an error if the above is not implemented fully
        pass
    
    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    return leftx, lefty, rightx, righty, out_img


def fit_polynomial(binary_warped):
    # Find our lane pixels first
    leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)

    # Fit a second order polynomial to each using `np.polyfit`
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    try:
        left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
        right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    except TypeError:
        # Avoids an error if `left` and `right_fit` are still none or incorrect
        print('The function failed to fit a line!')
        left_fitx = 1*ploty**2 + 1*ploty
        right_fitx = 1*ploty**2 + 1*ploty

    ## Visualization ##
    # Colors in the left and right lane regions
    out_img[lefty, leftx] = [255, 0, 0]
    out_img[righty, rightx] = [0, 0, 255]
    window_img = np.zeros_like(out_img)
    # Generate a polygon to illustrate the search window area
    # And recast the x and y points into usable format for cv2.fillPoly()
    
    margin = 20
    line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
    line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, 
                          ploty])))])
    line_pts = np.hstack((line_window1, line_window2))


    # Draw the lane onto the warped blank image
    cv2.fillPoly(window_img, np.int_([line_pts]), (0,255, 0))
    
    result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)

    return result, left_fitx, right_fitx, ploty, left_fit, right_fit


result, left_fitx, right_fitx, ploty, left_fit, right_fit = fit_polynomial(binary_warped)


fig, axs = plt.subplots(1, 2, figsize=(24, 9))
fig.tight_layout()
axs = axs.flatten()
axs[0].imshow(binary_warped, cmap='gray')
axs[0].set_title('Original Image', fontsize=20)
axs[1].imshow(result)


# Plots the left and right polynomials on the lane lines
axs[1].plot(left_fitx, ploty, color='yellow')
axs[1].plot(right_fitx, ploty, color='yellow')
axs[1].set_title('Undistorted and Warped Image', fontsize=20)

plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Step6: Determine the curvature of the lane and vehicle position with respect to center.

In [17]:
def measure_curvature_real(left_fitx, right_fitx, ploty, left_fit, right_fit):
    '''
    Calculates the curvature of polynomial functions in meters.
    '''
    # Define conversions in x and y from pixels space to meters 
    ym_per_pix = 16.0/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/1000 # meters per pixel in x dimension
    
    leftx = left_fitx*xm_per_pix
    rightx = right_fitx*xm_per_pix
    ploty = ploty*ym_per_pix
    
    left_fit_cr = np.polyfit(ploty, leftx, 2)
    right_fit_cr = np.polyfit(ploty, rightx, 2)
    # Define y-value where we want radius of curvature
    # We'll choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(ploty)
    
    ##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
    
    return left_curverad, right_curverad

# Calculate the radius of curvature in meters for both lane lines
left_curverad, right_curverad = measure_curvature_real(left_fitx, right_fitx, ploty, left_fit, right_fit)

print('left_curverad =', left_curverad, 'm;', 'right_curverad=', right_curverad, 'm')
# Should see values of 533.75 and 648.16 here, if using
# the default `generate_data` function with given seed number
left_curverad = 2360.29373141 m; right_curverad= 472.677348606 m

Step7: Warp the detected lane boundaries back onto the original image.

In [18]:
# print(perspective_Minv, '\n\n', perspective_M)
h, w = result.shape[:2]
print(result.shape)

warped_Minv = cv2.warpPerspective(result, perspective_Minv, (w,h), flags=cv2.INTER_LINEAR)
plt.imshow(warped_Minv)
img = cv2.imread('./output_images/output_images_undistort/undistort_straight_lines1.jpg')
img_warped_Minv = cv2.addWeighted(img, 1, warped_Minv, 1, 0)
plt.imshow(img_warped_Minv[:,:,::-1])
(720, 1280, 3)
Out[18]:
<matplotlib.image.AxesImage at 0x188c540f668>

Step8: Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

In [19]:
# Calculate the position of the vehicle

leftx_int = left_fit[0]*720**2 + left_fit[1]*720 + left_fit[2]
rightx_int = right_fit[0]*720**2 + right_fit[1]*720 + right_fit[2]
center = abs(w/2 - ((rightx_int+leftx_int)/2))
plt.imshow(img_warped_Minv[:,:,::-1])

plt.text(50, 50, 'Radius of curvature = {}(m)'.format(int((left_curverad + right_curverad)/2)),
         style='italic', color='white', fontsize=10)
plt.text(50, 100, 'Vehicle is {:.2f}m left of center'.format(center*3.7/700),
         style='italic', color='white', fontsize=10)
Out[19]:
<matplotlib.text.Text at 0x188c6c4b860>

Pipeline(video)

Step9: In piplie(video), I modularized the previous code

In [22]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip

import pickle

from ipywidgets import interact, interactive, fixed
from moviepy.editor import VideoFileClip
from IPython.display import HTML

%matplotlib qt
In [23]:
# Define a class to receive the characteristics of each line detection
class Line():
    def __init__(self):
        # was the line detected in the last iteration?
        self.detected = False  
        # x values of the last n fits of the line
        self.recent_xfitted = [] 
        #average x values of the fitted line over the last n iterations
        self.bestx = None     
        #polynomial coefficients averaged over the last n iterations
        self.best_fit = None  
        #polynomial coefficients for the most recent fit
        self.current_fit = []  
        #radius of curvature of the line in some units
        self.radius_of_curvature = None 
        #distance in meters of vehicle center from the line
        self.line_base_pos = None 
        #difference in fit coefficients between last and new fits
        self.diffs = np.array([0,0,0], dtype='float') 
        #x values for detected line pixels
        self.allx = None
        #y values for detected line pixels
        self.ally = None
    def add_fit(self, fit):
        # add a found fit to the line, up to n
        if fit is not None:
            if self.best_fit is not None:
                # if we have a best fit, see how this new fit compares
                self.diffs = abs(fit-self.best_fit)
            if (self.diffs[0] > 0.001 or \
               self.diffs[1] > 1.0 or \
               self.diffs[2] > 100.) and \
               len(self.current_fit) > 0:
                # bad fit! abort! abort! ... well, unless there are no fits in the current_fit queue, then we'll take it
                self.detected = False
            else:
                self.detected = True
                self.current_fit.append(fit)
                if len(self.current_fit) > 5:
                    # throw out old fits, keep newest n
                    self.current_fit = self.current_fit[len(self.current_fit)-5:]
                self.best_fit = np.average(self.current_fit, axis=0)
        # or remove one from the history, if not found
        else:
            self.detected = False
            if len(self.current_fit) > 0:
                # throw out oldest fit
                self.current_fit = self.current_fit[:len(self.current_fit)-1]
            if len(self.current_fit) > 0:
                # if there are still any fits in the queue, best_fit is their average
                self.best_fit = np.average(self.current_fit, axis=0)
In [ ]:
# Define the complete image processing pipeline, reads raw image and returns binary image with lane lines identified
# (hopefully)

# Define a function that thresholds the L-channel of HLS
def hls_lthresh(img, thresh=(220, 255)):
    # Convert to HLS color space
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    hls_l = hls[:,:,1]
    hls_l = hls_l*(255/np.max(hls_l))
    # Apply a threshold to the L channel
    binary_output = np.zeros_like(hls_l)
    binary_output[(hls_l > thresh[0]) & (hls_l <= thresh[1])] = 1
    # Return a binary image of threshold result
    return binary_output
# Define a function that thresholds the S-channel of HLS
def hls_sthresh(img, thresh=(170, 255)):
    # Convert to HLS color space
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    hls_s = hls[:,:,2]
    hls_s = hls_s*(255/np.max(hls_l))
    # Apply a threshold to the S channel
    binary_output = np.zeros_like(hls_s)
    binary_output[(hls_s > thresh[0]) & (hls_s <= thresh[1])] = 1
    # Return a binary image of threshold result
    return binary_output
# Define a function that thresholds the B-channel of LAB
def lab_bthresh(img, thresh=(190,255)):
    # Convert to LAB color space
    lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
    lab_b = lab[:,:,2]
    # don't normalize if there are no yellows in the image
    if np.max(lab_b) > 175:
        lab_b = lab_b*(255/np.max(lab_b))
    # Apply a threshold to the L channel
    binary_output = np.zeros_like(lab_b)
    binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 1
    return binary_output
# def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
def abs_sobel_thresh(img, thresh=(0,255), orient='x'):
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Apply x or y gradient with the OpenCV Sobel() function and take the absolute value
    if orient == 'x':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
    if orient == 'y':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
    # Rescale back to 8 bit integer
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    binary_output[(scaled_sobel > thresh[0]) & (scaled_sobel <= thresh[1])] = 1

    return binary_output

# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img, thresh=(0,255)):
    # Grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Calculate the x and y gradients
    
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)#kernel = 3
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
    # Take the absolute value of the gradient direction, apply a threshold, and create a binary image result
    absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
    binary_output =  np.zeros_like(absgraddir)
    binary_output[(absgraddir > thresh[0]) & (absgraddir <= thresh[1])] = 1

    # Return the binary image
    return binary_output

# combine HLS and sobelx
def binary_select(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) #Convert to HLS color space
    hls_s = hls[:,:,2] #Apply a threshold to the S channel
    
    # Threshold color channel
    s_binary = np.zeros_like(hls_s)
    s_binary[(hls_s > s_thresh[0]) & (hls_s <= s_thresh[1])] = 1

    # Sobel x
    sobelx = cv2.Sobel(hls_s, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
    
    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel > sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
    
    #the threshold result of combine s_binary and sxbinary
    binary_output = np.zeros_like(hls_s)
    binary_output[(s_binary >= 1) & (sxbinary >= 1)] = 1
    
    return binary_output 


def threshold_warp(img_undistort):
    
    # define source and destination points for transform    
    h,w = img_undistort.shape[:2]
    src = np.float32([[575,464], [707,464], 
                      [1049,682],[258,682]])
    dst = np.float32([[150,0],[w-150,0],
                      [w-150,h],[150,h]])
    # Perspective Transform
    img_unwarp, M, Minv = unwarp(img_undistort, src, dst)

    # HLS L-channel Threshold (using default parameters)
    img_LThresh = hls_lthresh(img_unwarp)

    # Lab B-channel Threshold (using default parameters)
    img_BThresh = lab_bthresh(img_unwarp)
    
    # Combine HLS and Lab B channel thresholds
    combined = np.zeros_like(img_BThresh)
    combined[(img_LThresh == 1) | (img_BThresh == 1)] = 1

    return combined, Minv
def process_image(img):
    # Undistort
    img_undistort = undistort(img) 
    new_img = np.copy(img)
    
    img_bin, Minv = threshold_warp(new_img)

    # if both left and right lines were detected last frame, use polyfit_using_prev_fit, otherwise use sliding window
    if not l_line.detected or not r_line.detected:
        l_fit, r_fit, l_lane_inds, r_lane_inds, _ = sliding_window_polyfit(img_bin)
    else:
        l_fit, r_fit, l_lane_inds, r_lane_inds = polyfit_using_prev_fit(img_bin, l_line.best_fit, r_line.best_fit)
        
    # invalidate both fits if the difference in their x-intercepts isn't around 900 px (+/- 150 px)
    if l_fit is not None and r_fit is not None:
        # calculate x-intercept (bottom of image, x=image_height) for fits
        h = img.shape[0]
        l_fit_x_int = l_fit[0]*h**2 + l_fit[1]*h + l_fit[2]
        r_fit_x_int = r_fit[0]*h**2 + r_fit[1]*h + r_fit[2]
        x_int_diff = abs(r_fit_x_int-l_fit_x_int)
        if abs(900 - x_int_diff) > 150:
            l_fit = None
            r_fit = None
            
    l_line.add_fit(l_fit)
    r_line.add_fit(r_fit)    
    # draw the current best fit if it exists
    if l_line.best_fit is not None and r_line.best_fit is not None:
        img_out_lane = draw_lane(new_img, img_bin, l_line.best_fit, r_line.best_fit, Minv)
        rad_l, rad_r, d_center = calc_curv_rad_and_center_dist(img_bin, l_line.best_fit, r_line.best_fit, 
                                                               l_lane_inds, r_lane_inds)
        img_out = draw_data(img_out_lane, (rad_l+rad_r)/2, d_center)
    else:
        img_out = new_img
    
    return img_out
In [28]:
def unwarp(img, src, dst):
    h,w = img.shape[:2]
    # use cv2.getPerspectiveTransform() to get M, the transform matrix, and Minv, the inverse
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # use cv2.warpPerspective() to warp your image to a top-down view
    warped = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
    return warped, M, Minv

# undistort image using camera calibration matrix from above
def undistort(img):
    # Read in the saved camera matrix and distortion coefficients
    pkl_file = open("./camera_cal/wide_dist_pickle.p", "rb")
    dist_pickle = pickle.load(pkl_file)
    mtx = dist_pickle["mtx"]
    dist = dist_pickle["dist"]
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    return undist

def plot_fit_onto_img(img, fit, plot_color):
    if fit is None:
        return img
    new_img = np.copy(img)
    h = new_img.shape[0]
    ploty = np.linspace(0, h-1, h)
    plotx = fit[0]*ploty**2 + fit[1]*ploty + fit[2]
    pts = np.array([np.transpose(np.vstack([plotx, ploty]))])
    cv2.polylines(new_img, np.int32([pts]), isClosed=False, color=plot_color, thickness=8)
    return new_img

def draw_data(original_img, curv_rad, center_dist):    
    new_img = np.copy(original_img)
    h = new_img.shape[0]
    font = cv2.FONT_HERSHEY_DUPLEX
    text = 'Radius of curvature = ' + '{:04.2f}'.format(curv_rad) + '(m)'
    cv2.putText(new_img, text, (40,70), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
    direction = ''
    if center_dist > 0:
        direction = 'right'
    elif center_dist < 0:
        direction = 'left'
    abs_center_dist = abs(center_dist)
    text = 'Vehicle is '+'{:04.3f}'.format(abs_center_dist) + '(m) ' + direction + ' of center'
    cv2.putText(new_img, text, (40,120), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
    return new_img

# img_out1 = draw_lane(new_img, img_bin, l_line.best_fit, r_line.best_fit, Minv)
def draw_lane(original_img, binary_img, l_fit, r_fit, Minv):
    new_img = np.copy(original_img)
    if l_fit is None or r_fit is None:
        return original_img
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(binary_img).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    
    h,w = binary_img.shape[:2]
    ploty = np.linspace(0, h-1, num=h)# to cover same y-range as image
    left_fitx = l_fit[0]*ploty**2 + l_fit[1]*ploty + l_fit[2]
    right_fitx = r_fit[0]*ploty**2 + r_fit[1]*ploty + r_fit[2]

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
    cv2.polylines(color_warp, np.int32([pts_left]), isClosed=False, color=(255,0,255), thickness=15)
    cv2.polylines(color_warp, np.int32([pts_right]), isClosed=False, color=(0,255,255), thickness=15)

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (w, h)) 
    # Combine the result with the original image
    result = cv2.addWeighted(new_img, 1, newwarp, 0.5, 0)
    return result

# Method to determine radius of curvature and distance from lane center 
# based on binary image, polynomial fit, and L and R lane pixel indices
      
# calc_curv_rad_and_center_dist(img_bin, l_line.best_fit, r_line.best_fit, l_lane_inds, r_lane_inds)
def calc_curv_rad_and_center_dist(bin_img, l_fit, r_fit, l_lane_inds, r_lane_inds):

    # Define conversions in x and y from pixels space to meters
#     ym_per_pix = 30/720 # meters per pixel in y dimension
#     xm_per_pix = 3.7/700 # meters per pixel in x dimension
    ym_per_pix = 16.0/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/1000 # meters per pixel in x dimension
    
    left_curverad, right_curverad, center_dist = (0, 0, 0)
    # Define y-value where we want radius of curvature
    # I'll choose the maximum y-value, corresponding to the bottom of the image
    h = bin_img.shape[0]
    ploty = np.linspace(0, h-1, h)
    y_eval = np.max(ploty)
  
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = bin_img.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Again, extract left and right line pixel positions
    leftx = nonzerox[l_lane_inds]
    lefty = nonzeroy[l_lane_inds] 
    rightx = nonzerox[r_lane_inds]
    righty = nonzeroy[r_lane_inds]
    
    if len(leftx) != 0 and len(rightx) != 0:
        # Fit new polynomials to x,y in world space
        left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
        right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
        # Calculate the new radii of curvature
        left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
        right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
        
    # Distance from center is image x midpoint - mean of l_fit and r_fit intercepts 
    if r_fit is not None and l_fit is not None:
        car_position = bin_img.shape[1]/2
        l_fit_x_int = l_fit[0]*h**2 + l_fit[1]*h + l_fit[2]
        r_fit_x_int = r_fit[0]*h**2 + r_fit[1]*h + r_fit[2]
        lane_center_position = (r_fit_x_int + l_fit_x_int) /2
        center_dist = (car_position - lane_center_position) * xm_per_pix
    return left_curverad, right_curverad, center_dist

# Define method to fit polynomial to binary image based upon a previous fit (chronologically speaking);
# this assumes that the fit will not change significantly from one video frame to the next
def polyfit_using_prev_fit(binary_warped, left_fit_prev, right_fit_prev):
    margin = 50
    # Grab activated pixels
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    
    left_lane_inds = ((nonzerox > (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy + 
                    left_fit_prev[2] - margin)) & (nonzerox < (left_fit_prev[0]*(nonzeroy**2) + 
                    left_fit_prev[1]*nonzeroy + left_fit_prev[2] + margin)))
    right_lane_inds = ((nonzerox > (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy + 
                    right_fit_prev[2] - margin)) & (nonzerox < (right_fit_prev[0]*(nonzeroy**2) + 
                    right_fit_prev[1]*nonzeroy + right_fit_prev[2] + margin)))
    
    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    
    left_fit_new, right_fit_new = (None, None)
    if len(leftx) != 0:
        # Fit a second order polynomial to each
        left_fit_new = np.polyfit(lefty, leftx, 2)
    if len(rightx) != 0:
        right_fit_new = np.polyfit(righty, rightx, 2)
    return left_fit_new, right_fit_new, left_lane_inds, right_lane_inds

# Define method to fit polynomial to binary image with lines extracted, using sliding window
def sliding_window_polyfit(binary_warped):
    
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0) #WHC
    # Create an output image to draw on and visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]//2) #W/2
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # HYPERPARAMETERS
    # Choose the number of sliding windows
    nwindows = 9
    # Set the width of the windows +/- margin
    margin = 100
    # Set minimum number of pixels found to recenter window
    minpix = 50
    
    # Set height of windows - based on nwindows above and image shape
    window_height = np.int(binary_warped.shape[0]//nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0]) #H
    nonzerox = np.array(nonzero[1]) #W
    # Current positions to be updated later for each window in nwindows
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []
    
    # Rectangle data for visualization
    rectangle_data = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin

        rectangle_data.append((win_y_low, win_y_high, win_xleft_low, win_xleft_high, win_xright_low, win_xright_high))
        
        # Identify the nonzero pixels in x and y within the window #
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
        
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 

    left_fit, right_fit = (None, None)
    # Fit a second order polynomial to each
    if len(leftx) != 0:
        left_fit = np.polyfit(lefty, leftx, 2)
    if len(rightx) != 0:
        right_fit = np.polyfit(righty, rightx, 2)
    
    visualization_data = (rectangle_data, histogram)
    
    return left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data

process project_video.mp4

In [29]:
l_line = Line()
r_line = Line()
video_output1 = 'project_video_output.mp4'
video_input1 = VideoFileClip('./project_video.mp4')#.subclip(0,10)
processed_video = video_input1.fl_image(process_image)
%time processed_video.write_videofile(video_output1, audio=False)
[MoviePy] >>>> Building video project_video_output.mp4
[MoviePy] Writing video project_video_output.mp4
100%|█████████████████████████████████████████████████████████████████████████████▉| 1260/1261 [04:59<00:00,  4.47it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_video_output.mp4 

Wall time: 5min
In [30]:
from IPython.display import HTML
HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
  
</video>
""".format('project_video_output.mp4'))
Out[30]:

process challenge_video.mp4

In [31]:
l_line = Line()
r_line = Line()
# video_output1 = 'project_video_output_diagnostic.mp4'
video_output1 = 'challenge_video_output.mp4'
video_input1 = VideoFileClip('./challenge_video.mp4')#.subclip(0,10)
processed_video = video_input1.fl_image(process_image)
%time processed_video.write_videofile(video_output1, audio=False)
[MoviePy] >>>> Building video challenge_video_output.mp4
[MoviePy] Writing video challenge_video_output.mp4
100%|████████████████████████████████████████████████████████████████████████████████| 485/485 [01:46<00:00,  4.52it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: challenge_video_output.mp4 

Wall time: 1min 48s
In [32]:
from IPython.display import HTML
HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
  
</video>
""".format('challenge_video_output.mp4'))
Out[32]:

process harder_challenge_video.mp4

In [33]:
l_line = Line()
r_line = Line()
video_output1 = 'harder_challenge_video_output.mp4'
video_input1 = VideoFileClip('./harder_challenge_video.mp4').subclip(0,10)
processed_video = video_input1.fl_image(process_image)
%time processed_video.write_videofile(video_output1, audio=False)
[MoviePy] >>>> Building video harder_challenge_video_output.mp4
[MoviePy] Writing video harder_challenge_video_output.mp4
100%|███████████████████████████████████████████████████████████████████████████████▋| 250/251 [01:06<00:00,  3.03it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: harder_challenge_video_output.mp4 

Wall time: 1min 12s
In [34]:
from IPython.display import HTML
HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
  
</video>
""".format('harder_challenge_video_output.mp4'))
Out[34]: